From 58ff34519272ac5c51583dc67373269f47868541 Mon Sep 17 00:00:00 2001 From: "kaf24@scramble.cl.cam.ac.uk" Date: Wed, 25 Feb 2004 16:47:26 +0000 Subject: [PATCH] bitkeeper revision 1.749.1.1 (403cd19e9cL26IazEdGTvx0tHxbYqw) event_channel.h: new file sched.h, hypervisor-if.h, event_channel.c, domain.c, apic.c: Rewritten event-channel code. --- .rootkeys | 1 + xen/arch/i386/apic.c | 23 +- xen/common/domain.c | 7 +- xen/common/event_channel.c | 379 ++++++++++++--------- xen/include/hypervisor-ifs/event_channel.h | 93 +++++ xen/include/hypervisor-ifs/hypervisor-if.h | 15 - xen/include/xeno/sched.h | 13 +- 7 files changed, 342 insertions(+), 189 deletions(-) create mode 100644 xen/include/hypervisor-ifs/event_channel.h diff --git a/.rootkeys b/.rootkeys index 788428d145..9e80ed605d 100644 --- a/.rootkeys +++ b/.rootkeys @@ -417,6 +417,7 @@ 3ddb79c2YTaZwOqWin9-QNgHge5RVw xen/include/hypervisor-ifs/block.h 3ddb79c2PMeWTK86y4C3F4MzHw4A1g xen/include/hypervisor-ifs/dom0_ops.h 3e6377eaioRoNm0m_HSDEAd4Vqrq_w xen/include/hypervisor-ifs/dom_mem_ops.h +403cd194j2pyLqXD8FJ-ukvZzkPenw xen/include/hypervisor-ifs/event_channel.h 3ddb79c25UE59iu4JJcbRalx95mvcg xen/include/hypervisor-ifs/hypervisor-if.h 3ead095dE_VF-QA88rl_5cWYRWtRVQ xen/include/hypervisor-ifs/kbd.h 3ddb79c2oRPrzClk3zbTkRHlpumzKA xen/include/hypervisor-ifs/network.h diff --git a/xen/arch/i386/apic.c b/xen/arch/i386/apic.c index 469890b488..e622cc4104 100644 --- a/xen/arch/i386/apic.c +++ b/xen/arch/i386/apic.c @@ -659,7 +659,6 @@ void __init setup_APIC_clocks (void) */ int reprogram_ac_timer(s_time_t timeout) { - int cpu = smp_processor_id(); s_time_t now; s_time_t expire; u64 apic_tmict; @@ -669,7 +668,8 @@ int reprogram_ac_timer(s_time_t timeout) * cause an immediate interrupt). At least this is guaranteed to hold it * off for ages (esp. since the clock ticks on bus clock, not cpu clock!). */ - if (timeout == 0) { + if ( timeout == 0 ) + { apic_tmict = 0xffffffff; goto reprogram; } @@ -677,10 +677,12 @@ int reprogram_ac_timer(s_time_t timeout) now = NOW(); expire = timeout - now; /* value from now */ - if (expire <= 0) { + if ( expire <= 0 ) + { Dprintk("APICT[%02d] Timeout in the past 0x%08X%08X > 0x%08X%08X\n", - cpu, (u32)(now>>32), (u32)now, (u32)(timeout>>32),(u32)timeout); - return 0; /* timeout value in the past */ + smp_processor_id(), (u32)(now>>32), + (u32)now, (u32)(timeout>>32),(u32)timeout); + return 0; } /* @@ -693,12 +695,15 @@ int reprogram_ac_timer(s_time_t timeout) /* conversion to bus units */ apic_tmict = (((u64)bus_scale) * expire)>>18; - if (apic_tmict >= 0xffffffff) { - Dprintk("APICT[%02d] Timeout value too large\n", cpu); + if ( apic_tmict >= 0xffffffff ) + { + Dprintk("APICT[%02d] Timeout value too large\n", smp_processor_id()); apic_tmict = 0xffffffff; } - if (apic_tmict == 0) { - Dprintk("APICT[%02d] timeout value too small\n", cpu); + + if ( apic_tmict == 0 ) + { + Dprintk("APICT[%02d] timeout value too small\n", smp_processor_id()); return 0; } diff --git a/xen/common/domain.c b/xen/common/domain.c index 85a3c8ae31..597429b357 100644 --- a/xen/common/domain.c +++ b/xen/common/domain.c @@ -127,6 +127,8 @@ void kill_domain_with_errmsg(const char *err) void __kill_domain(struct task_struct *p) { + extern void destroy_event_channels(struct task_struct *); + int i; struct task_struct **pp; unsigned long flags; @@ -149,6 +151,8 @@ void __kill_domain(struct task_struct *p) for ( i = 0; i < MAX_DOMAIN_VIFS; i++ ) unlink_net_vif(p->net_vif_list[i]); + destroy_event_channels(p); + /* * Note this means that find_domain_by_id may fail, even when the caller * holds a reference to the domain being queried. Take care! @@ -467,8 +471,6 @@ unsigned int alloc_new_dom_mem(struct task_struct *p, unsigned int kbytes) /* Release resources belonging to task @p. */ void release_task(struct task_struct *p) { - extern void destroy_event_channels(struct task_struct *); - ASSERT(p->state == TASK_DYING); ASSERT(!p->has_cpu); @@ -481,7 +483,6 @@ void release_task(struct task_struct *p) destroy_blkdev_info(p); /* Free all memory associated with this domain. */ - destroy_event_channels(p); free_page((unsigned long)p->mm.perdomain_pt); UNSHARE_PFN(virt_to_page(p->shared_info)); free_all_dom_mem(p); diff --git a/xen/common/event_channel.c b/xen/common/event_channel.c index 3674f56019..ad668e8684 100644 --- a/xen/common/event_channel.c +++ b/xen/common/event_channel.c @@ -3,7 +3,7 @@ * * Event channels between domains. * - * Copyright (c) 2003, K A Fraser. + * Copyright (c) 2003-2004, K A Fraser. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -22,135 +22,147 @@ #include #include +#include +#include + #define MAX_EVENT_CHANNELS 1024 -static long event_channel_open(u16 target_dom) +static int get_free_port(struct task_struct *p) { - struct task_struct *lp = current, *rp; - int i, lmax, rmax, lid, rid; - event_channel_t *lchn, *rchn; - shared_info_t *rsi; + int max, port; + event_channel_t *chn; + + max = p->max_event_channel; + chn = p->event_channel; + + for ( port = 0; port < max; port++ ) + if ( chn[port].state == ECS_FREE ) + break; + + if ( port == max ) + { + if ( max == MAX_EVENT_CHANNELS ) + return -ENOSPC; + + max = (max == 0) ? 4 : (max * 2); + + chn = kmalloc(max * sizeof(event_channel_t), GFP_KERNEL); + if ( unlikely(chn == NULL) ) + return -ENOMEM; + + memset(chn, 0, max * sizeof(event_channel_t)); + + if ( p->event_channel != NULL ) + { + memcpy(chn, p->event_channel, (max/2) * sizeof(event_channel_t)); + kfree(p->event_channel); + } + + p->event_channel = chn; + p->max_event_channel = max; + } + + return port; +} + +static inline unsigned long set_event_pending(struct task_struct *p, int port) +{ + if ( !test_and_set_bit(port, &p->shared_info->event_channel_pend[0]) && + !test_and_set_bit(port>>5, &p->shared_info->event_channel_pend_sel) ) + return mark_guest_event(p, _EVENT_EVTCHN); + return 0; +} + +static inline unsigned long set_event_disc(struct task_struct *p, int port) +{ + if ( !test_and_set_bit(port, &p->shared_info->event_channel_disc[0]) && + !test_and_set_bit(port>>5, &p->shared_info->event_channel_disc_sel) ) + return mark_guest_event(p, _EVENT_EVTCHN); + return 0; +} + +static long event_channel_open(evtchn_open_t *open) +{ + struct task_struct *lp, *rp; + int lport = 0, rport = 0; unsigned long cpu_mask; + domid_t ldom = open->local_dom, rdom = open->remote_dom; long rc = 0; - rp = find_domain_by_id(target_dom); + if ( !IS_PRIV(current) ) + return -EPERM; + + /* 'local_dom' may be DOMID_SELF. 'remote_dom' cannot be.*/ + if ( ldom == DOMID_SELF ) + ldom = current->domain; - /* - * We need locks at both ends to make a connection. We avoid deadlock - * by acquiring the locks in address order. - */ - if ( (unsigned long)lp < (unsigned long)rp ) + /* Event channel must connect distinct domains. */ + if ( ldom == rdom ) + return -EINVAL; + + if ( ((lp = find_domain_by_id(ldom)) == NULL) || + ((rp = find_domain_by_id(rdom)) == NULL) ) + { + if ( lp != NULL ) + put_task_struct(lp); + return -ESRCH; + } + + /* Avoid deadlock by first acquiring lock of domain with smaller id. */ + if ( ldom < rdom ) { spin_lock(&lp->event_channel_lock); spin_lock(&rp->event_channel_lock); } else { - if ( likely(rp != NULL) ) - spin_lock(&rp->event_channel_lock); + spin_lock(&rp->event_channel_lock); spin_lock(&lp->event_channel_lock); } - lmax = lp->max_event_channel; - lchn = lp->event_channel; - lid = -1; - - /* - * Find the first unused event channel. Also ensure bo channel already - * exists to the specified target domain. - */ - for ( i = 0; i < lmax; i++ ) + if ( (lport = get_free_port(lp)) < 0 ) { - if ( (lid == -1) && !(lchn[i].flags & ECF_INUSE) ) - { - lid = i; - } - else if ( unlikely(lchn[i].target_dom == target_dom) ) - { - rc = -EEXIST; - goto out; - } + rc = lport; + goto out; } - - /* If there is no free slot we need to allocate a bigger channel list. */ - if ( unlikely(lid == -1) ) - { - /* Reached maximum channel count? */ - if ( unlikely(lmax == MAX_EVENT_CHANNELS) ) - { - rc = -ENOSPC; - goto out; - } - - lmax = (lmax == 0) ? 4 : (lmax * 2); - - lchn = kmalloc(lmax * sizeof(event_channel_t), GFP_KERNEL); - if ( unlikely(lchn == NULL) ) - { - rc = -ENOMEM; - goto out; - } - memset(lchn, 0, lmax * sizeof(event_channel_t)); - - if ( likely(lp->event_channel != NULL) ) - kfree(lp->event_channel); - - lp->event_channel = lchn; - lp->max_event_channel = lmax; + if ( (rport = get_free_port(rp)) < 0 ) + { + rc = rport; + goto out; } - lchn[lid].target_dom = target_dom; - lchn[lid].flags = ECF_INUSE; + lp->event_channel[lport].remote_dom = rp; + lp->event_channel[lport].remote_port = (u16)rport; + lp->event_channel[lport].state = ECS_CONNECTED; - if ( likely(rp != NULL) ) - { - rchn = rp->event_channel; - rmax = rp->max_event_channel; - - for ( rid = 0; rid < rmax; rid++ ) - { - if ( (rchn[rid].target_dom == lp->domain) && - (rchn[rid].flags & ECF_INUSE) ) - { - /* - * The target was awaiting a connection. We make the connection - * and send a connection-made event to the remote end. - */ - rchn[rid].flags = ECF_INUSE | ECF_CONNECTED | lid; - lchn[lid].flags = ECF_INUSE | ECF_CONNECTED | rid; - - rsi = rp->shared_info; - if ( !test_and_set_bit(rid, &rsi->event_channel_pend[0]) && - !test_and_set_bit(rid>>5, &rsi->event_channel_pend_sel) ) - { - cpu_mask = mark_guest_event(rp, _EVENT_EVTCHN); - guest_event_notify(cpu_mask); - } - - break; - } - } - } + rp->event_channel[rport].remote_dom = lp; + rp->event_channel[rport].remote_port = (u16)lport; + rp->event_channel[rport].state = ECS_CONNECTED; + + cpu_mask = set_event_pending(lp, lport); + cpu_mask |= set_event_pending(rp, rport); + guest_event_notify(cpu_mask); out: spin_unlock(&lp->event_channel_lock); - if ( rp != NULL ) - { - spin_unlock(&rp->event_channel_lock); - put_task_struct(rp); - } + spin_unlock(&rp->event_channel_lock); + + put_task_struct(lp); + put_task_struct(rp); + + open->local_port = lport; + open->remote_port = rport; return rc; } -static long event_channel_close(u16 lid) +static long __event_channel_close(struct task_struct *lp, int lport) { - struct task_struct *lp = current, *rp = NULL; + struct task_struct *rp = NULL; event_channel_t *lchn, *rchn; - u16 rid; - shared_info_t *rsi; + int rport; unsigned long cpu_mask; long rc = 0; @@ -159,21 +171,21 @@ static long event_channel_close(u16 lid) lchn = lp->event_channel; - if ( unlikely(lid >= lp->max_event_channel) || - unlikely(!(lchn[lid].flags & ECF_INUSE)) ) + if ( (lport < 0) || (lport >= lp->max_event_channel) || + (lchn[lport].state == ECS_FREE) ) { rc = -EINVAL; goto out; } - if ( lchn[lid].flags & ECF_CONNECTED ) + if ( lchn[lport].state == ECS_CONNECTED ) { if ( rp == NULL ) { - rp = find_domain_by_id(lchn[lid].target_dom); - ASSERT(rp != NULL); - - if ( (unsigned long)lp < (unsigned long)rp ) + rp = lchn[lport].remote_dom; + get_task_struct(rp); + + if ( lp->domain < rp->domain ) { spin_lock(&rp->event_channel_lock); } @@ -184,34 +196,39 @@ static long event_channel_close(u16 lid) goto again; } } - else if ( rp->domain != lchn[lid].target_dom ) + else if ( rp != lchn[lport].remote_dom ) { rc = -EINVAL; goto out; } - rchn = rp->event_channel; - rid = lchn[lid].flags & ECF_TARGET_ID; - ASSERT(rid < rp->max_event_channel); - ASSERT(rchn[rid].flags == (ECF_INUSE | ECF_CONNECTED | lid)); - ASSERT(rchn[rid].target_dom == lp->domain); - - rchn[rid].flags = ECF_INUSE; - - rsi = rp->shared_info; - if ( !test_and_set_bit(rid, &rsi->event_channel_disc[0]) && - !test_and_set_bit(rid>>5, &rsi->event_channel_disc_sel) ) - { - cpu_mask = mark_guest_event(rp, _EVENT_EVTCHN); - guest_event_notify(cpu_mask); - } + rchn = rp->event_channel; + rport = lchn[lport].remote_port; + + if ( rport >= rp->max_event_channel ) + BUG(); + if ( rchn[rport].state != ECS_CONNECTED ) + BUG(); + if ( rchn[rport].remote_dom != lp ) + BUG(); + + rchn[rport].state = ECS_ZOMBIE; + rchn[rport].remote_dom = NULL; + rchn[rport].remote_port = 0xFFFF; + + cpu_mask = set_event_disc(lp, lport); + cpu_mask |= set_event_disc(rp, rport); + guest_event_notify(cpu_mask); } - lchn[lid].target_dom = 0; - lchn[lid].flags = 0; + lchn[lport].state = ECS_FREE; + lchn[lport].remote_dom = NULL; + lchn[lport].remote_port = 0xFFFF; out: spin_unlock(&lp->event_channel_lock); + put_task_struct(lp); + if ( rp != NULL ) { spin_unlock(&rp->event_channel_lock); @@ -222,87 +239,135 @@ static long event_channel_close(u16 lid) } -static long event_channel_send(u16 lid) +static long event_channel_close(evtchn_close_t *close) +{ + struct task_struct *lp; + int lport = close->local_port; + long rc; + domid_t ldom = close->local_dom; + + if ( ldom == DOMID_SELF ) + ldom = current->domain; + else if ( !IS_PRIV(current) ) + return -EPERM; + + if ( (lp = find_domain_by_id(ldom)) == NULL ) + return -ESRCH; + + rc = __event_channel_close(lp, lport); + + put_task_struct(lp); + return rc; +} + + +static long event_channel_send(int lport) { struct task_struct *lp = current, *rp; - u16 rid, rdom; - shared_info_t *rsi; + int rport; unsigned long cpu_mask; spin_lock(&lp->event_channel_lock); - if ( unlikely(lid >= lp->max_event_channel) || - unlikely(!(lp->event_channel[lid].flags & ECF_CONNECTED)) ) + if ( unlikely(lport < 0) || + unlikely(lport >= lp->max_event_channel) || + unlikely(lp->event_channel[lport].state != ECS_CONNECTED) ) { spin_unlock(&lp->event_channel_lock); return -EINVAL; } - rdom = lp->event_channel[lid].target_dom; - rid = lp->event_channel[lid].flags & ECF_TARGET_ID; + rp = lp->event_channel[lport].remote_dom; + rport = lp->event_channel[lport].remote_port; - spin_unlock(&lp->event_channel_lock); + get_task_struct(rp); - if ( unlikely(rid >= MAX_EVENT_CHANNELS) || - unlikely ((rp = find_domain_by_id(rdom)) == NULL) ) - return -EINVAL; + spin_unlock(&lp->event_channel_lock); - rsi = rp->shared_info; - if ( !test_and_set_bit(rid, &rsi->event_channel_pend[0]) && - !test_and_set_bit(rid>>5, &rsi->event_channel_pend_sel) ) - { - cpu_mask = mark_guest_event(rp, _EVENT_EVTCHN); - guest_event_notify(cpu_mask); - } + cpu_mask = set_event_pending(rp, rport); + guest_event_notify(cpu_mask); put_task_struct(rp); + return 0; } -static long event_channel_status(u16 lid) +static long event_channel_status(evtchn_status_t *status) { - struct task_struct *lp = current; + struct task_struct *lp; + domid_t ldom = status->local_dom; + int lport = status->local_port; event_channel_t *lchn; - long rc = EVTCHNSTAT_closed; + + if ( ldom == DOMID_SELF ) + ldom = current->domain; + else if ( !IS_PRIV(current) ) + return -EPERM; + + if ( (lp = find_domain_by_id(ldom)) == NULL ) + return -ESRCH; spin_lock(&lp->event_channel_lock); lchn = lp->event_channel; - if ( lid < lp->max_event_channel ) + if ( (lport < 0) || (lport >= lp->max_event_channel) ) { - if ( lchn[lid].flags & ECF_CONNECTED ) - rc = EVTCHNSTAT_connected; - else if ( lchn[lid].flags & ECF_INUSE ) - rc = EVTCHNSTAT_disconnected; + spin_unlock(&lp->event_channel_lock); + return -EINVAL; + } + + switch ( lchn[lport].state ) + { + case ECS_FREE: + status->status = EVTCHNSTAT_closed; + break; + case ECS_ZOMBIE: + status->status = EVTCHNSTAT_disconnected; + break; + case ECS_CONNECTED: + status->status = EVTCHNSTAT_connected; + status->remote_dom = lchn[lport].remote_dom->domain; + status->remote_port = lchn[lport].remote_port; + break; + default: + BUG(); } spin_unlock(&lp->event_channel_lock); - return rc; + return 0; } -long do_event_channel_op(unsigned int cmd, unsigned int id) +long do_event_channel_op(evtchn_op_t *uop) { long rc; + evtchn_op_t op; + + if ( copy_from_user(&op, uop, sizeof(op)) != 0 ) + return -EFAULT; - switch ( cmd ) + switch ( op.cmd ) { case EVTCHNOP_open: - rc = event_channel_open((u16)id); + rc = event_channel_open(&op.u.open); + if ( copy_to_user(uop, &op, sizeof(op)) != 0 ) + rc = -EFAULT; /* Cleaning up here would be a mess! */ break; case EVTCHNOP_close: - rc = event_channel_close((u16)id); + rc = event_channel_close(&op.u.close); break; case EVTCHNOP_send: - rc = event_channel_send((u16)id); + rc = event_channel_send(op.u.send.local_port); break; case EVTCHNOP_status: - rc = event_channel_status((u16)id); + rc = event_channel_status(&op.u.status); + if ( copy_to_user(uop, &op, sizeof(op)) != 0 ) + rc = -EFAULT; break; default: @@ -320,7 +385,7 @@ void destroy_event_channels(struct task_struct *p) if ( p->event_channel != NULL ) { for ( i = 0; i < p->max_event_channel; i++ ) - (void)event_channel_close((u16)i); + (void)__event_channel_close(p, i); kfree(p->event_channel); } } diff --git a/xen/include/hypervisor-ifs/event_channel.h b/xen/include/hypervisor-ifs/event_channel.h new file mode 100644 index 0000000000..e7c3aa7e1f --- /dev/null +++ b/xen/include/hypervisor-ifs/event_channel.h @@ -0,0 +1,93 @@ +/****************************************************************************** + * event_channel.h + * + * Event channels between domains. + * + * Copyright (c) 2003-2004, K A Fraser. + */ + +#ifndef __HYPERVISOR_IFS__EVENT_CHANNEL_H__ +#define __HYPERVISOR_IFS__EVENT_CHANNEL_H__ + +/* + * EVTCHNOP_open: Open a communication channel between and + * . + * NOTES: + * 1. may be specified as DOMID_SELF. + * 2. Only a sufficiently-privileged domain may create an event channel. + * 3. and are only supplied if the op succeeds. + */ +#define EVTCHNOP_open 0 +typedef struct evtchn_open +{ + /* IN parameters. */ + domid_t local_dom, remote_dom; + /* OUT parameters. */ + int local_port, remote_port; +} evtchn_open_t; + +/* + * EVTCHNOP_close: Close the communication channel which has an endpoint at + * . + * NOTES: + * 1. may be specified as DOMID_SELF. + * 2. Only a sufficiently-privileged domain may close an event channel + * for which is not DOMID_SELF. + */ +#define EVTCHNOP_close 1 +typedef struct evtchn_close +{ + /* IN parameters. */ + domid_t local_dom; + int local_port; + /* No OUT parameters. */ +} evtchn_close_t; + +/* + * EVTCHNOP_send: Send an event to the remote end of the channel whose local + * endpoint is . + */ +#define EVTCHNOP_send 2 +typedef struct evtchn_send +{ + /* IN parameters. */ + int local_port; + /* No OUT parameters. */ +} evtchn_send_t; + +/* + * EVTCHNOP_status: Get the current status of the communication channel which + * has an endpoint at . + * NOTES: + * 1. may be specified as DOMID_SELF. + * 2. Only a sufficiently-privileged domain may obtain the status of an event + * channel for which is not DOMID_SELF. + * 3. is only supplied if status is 'connected'. + */ +#define EVTCHNOP_status 3 /* Get status of . */ +typedef struct evtchn_status +{ + /* IN parameters */ + domid_t local_dom; + int local_port; + /* OUT parameters */ + domid_t remote_dom; + int remote_port; +#define EVTCHNSTAT_closed 0 /* Chennel is not in use. */ +#define EVTCHNSTAT_disconnected 1 /* Channel is not connected to remote. */ +#define EVTCHNSTAT_connected 2 /* Channel is connected to remote. */ + int status; +} evtchn_status_t; + +typedef struct evtchn_op +{ + int cmd; /* EVTCHNOP_* */ + union { + evtchn_open_t open; + evtchn_close_t close; + evtchn_send_t send; + evtchn_status_t status; + } u; +} evtchn_op_t; + +#endif /* __HYPERVISOR_IFS__EVENT_CHANNEL_H__ */ diff --git a/xen/include/hypervisor-ifs/hypervisor-if.h b/xen/include/hypervisor-ifs/hypervisor-if.h index 9ee56c04d0..6001bbcdf7 100644 --- a/xen/include/hypervisor-ifs/hypervisor-if.h +++ b/xen/include/hypervisor-ifs/hypervisor-if.h @@ -168,21 +168,6 @@ #define SCHEDOP_exit 3 /* Exit and kill this domain. */ #define SCHEDOP_stop 4 /* Stop executing this domain. */ -/* - * EVTCHNOP_* - Event channel operations. - */ -#define EVTCHNOP_open 0 /* Open channel to . */ -#define EVTCHNOP_close 1 /* Close . */ -#define EVTCHNOP_send 2 /* Send event on . */ -#define EVTCHNOP_status 3 /* Get status of . */ - -/* - * EVTCHNSTAT_* - Non-error return values from EVTCHNOP_status. - */ -#define EVTCHNSTAT_closed 0 /* Chennel is not in use. */ -#define EVTCHNSTAT_disconnected 1 /* Channel is not connected to remote. */ -#define EVTCHNSTAT_connected 2 /* Channel is connected to remote. */ - #ifndef __ASSEMBLY__ diff --git a/xen/include/xeno/sched.h b/xen/include/xeno/sched.h index 4375d9a7be..d5973f3f82 100644 --- a/xen/include/xeno/sched.h +++ b/xen/include/xeno/sched.h @@ -45,13 +45,16 @@ extern struct mm_struct init_mm; #define IS_PRIV(_p) (test_bit(PF_PRIVILEGED, &(_p)->flags)) +struct task_struct; + typedef struct event_channel_st { - u16 target_dom; /* Target domain (i.e. domain at remote end). */ -#define ECF_TARGET_ID ((1<<10)-1) /* Channel identifier at remote end. */ -#define ECF_INUSE (1<<10) /* Is this channel descriptor in use? */ -#define ECF_CONNECTED (1<<11) /* Is this channel connected to remote? */ - u16 flags; + struct task_struct *remote_dom; + u16 remote_port; +#define ECS_FREE 0 /* Available for use. */ +#define ECS_ZOMBIE 1 /* Connection is closed. Remote is disconnected. */ +#define ECS_CONNECTED 2 /* Connected to remote end. */ + u16 state; } event_channel_t; struct task_struct -- 2.30.2